#import necessary libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import cv2
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.applications.mobilenet import MobileNet
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input
import os, glob
from tensorflow.keras.preprocessing import image
print(tf.__version__)
from google.colab import drive
drive.mount('/content/drive')
Object Detection using Haar feature-based cascade classifiers is an effective object detection method proposed by Paul Viola and Michael Jones in their paper, "Rapid Object Detection using a Boosted Cascade of Simple Features" in 2001. It is a machine learning based approach where a cascade function is trained from a lot of positive and negative images. It is then used to detect objects in other images. We'll be using a Haar Cascade Model trained to detect faces in order to obtain the bounding box coordinates of faces in an image.
#face_model = cv2.CascadeClassifier('/content/drive/MyDrive/haarcascades/haarcascade_frontalface_default.xml')
face_model = cv2.CascadeClassifier('/content/drive/MyDrive/Machine Learning/haarcascades/haarcascade_frontalface_default.xml')
#img = cv2.imread('/content/drive/MyDrive/Mask_Data/images/maksssksksss244.png')
img = cv2.imread('/content/drive/MyDrive/Machine Learning/Face Mask/images/maksssksksss244.png')
img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE)
faces = face_model.detectMultiScale(img,scaleFactor=1.1, minNeighbors=4) #returns a list of (x,y,w,h) tuples
out_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) #colored output image
for (x,y,w,h) in faces:
cv2.rectangle(out_img,(x,y),(x+w,y+h),(0,0,255),1)
plt.figure(figsize=(12,12))
plt.imshow(out_img)
In our project, we will use Haar feature-based cascade classifiers to detect faces and then train image classification models to identify masks.
#data augmentation with shear_range, zoom_range, horizontal_flip
#base_dir = '/content/drive/MyDrive/Face Mask Dataset'
base_dir = '/content/drive/MyDrive/Machine Learning/Face Mask Dataset'
train_gen = keras.preprocessing.image.ImageDataGenerator(rescale=1/255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_generator = train_gen.flow_from_directory(directory=base_dir+'/Train',
target_size=(128,128),
class_mode='categorical',
batch_size=32)
class_names = {v:k for k,v in train_generator.class_indices.items()}
images,labels = next(iter(train_generator))
plt.figure(figsize=(10,10))
for i in range(15):
plt.subplot(5,5,i+1)
plt.imshow(images[i])
plt.xticks([])
plt.yticks([])
plt.xlabel(class_names[labels[i][1]])
plt.show()
mask_train_dir='/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Train/WithMask'
no_mask_train_dir='/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Train/WithoutMask'
mask_valid_dir='/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Validation/WithMask'
no_mask_valid_dir='/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Validation/WithoutMask'
mask_test_dir='/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Test/WithMask'
no_mask_test_dir='/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Test/WithoutMask'
dirlist=[mask_train_dir, no_mask_train_dir, mask_valid_dir, no_mask_valid_dir, mask_test_dir, no_mask_test_dir]
classes=['mask', 'no_mask', 'mask', 'no_mask', 'mask', 'no_mask']
filepaths=[]
labels=[]
for d,c in zip(dirlist, classes):
flist=os.listdir(d)
for f in flist:
fpath=os.path.join (d,f)
filepaths.append(fpath)
labels.append(c)
print ('filepaths: ', len(filepaths), ' labels: ', len(labels))
#check the labels
Fseries=pd.Series(filepaths, name='file_paths')
Lseries=pd.Series(labels, name='labels')
df=pd.concat([Fseries,Lseries], axis=1)
#df=pd.DataFrame(np.array(df).reshape(11792,2), columns = ['file_paths', 'labels'])
df=pd.DataFrame(np.array(df).reshape(11801,2), columns = ['file_paths', 'labels'])
print(df['labels'].value_counts())
#split the train set and validation set with 0.9
train_df, test_df=train_test_split(df, train_size=0.9 , shuffle=True)
train_df, valid_df=train_test_split(train_df, train_size=0.9, shuffle=True)
print(train_df.labels.value_counts())
print(valid_df.labels.value_counts())
print(test_df.labels.value_counts())
target_size=(128,128)
batch_size=32
train_datagen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function = tf.keras.applications.inception_resnet_v2.preprocess_input,
zoom_range=0.2,
rotation_range=40)
test_datagen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function = tf.keras.applications.inception_resnet_v2.preprocess_input)
train_gen = train_datagen.flow_from_dataframe(
train_df,
x_col='file_paths',
y_col='labels',
target_size=target_size,
batch_size=batch_size,
color_mode='rgb',
class_mode='categorical')
valid_gen = test_datagen.flow_from_dataframe(
valid_df,
x_col='file_paths',
y_col='labels',
target_size=target_size,
batch_size=batch_size,
color_mode='rgb',
class_mode='categorical')
test_gen = test_datagen.flow_from_dataframe(
test_df,
x_col='file_paths',
y_col='labels',
target_size=target_size,
batch_size=batch_size,
color_mode='rgb',
class_mode='categorical')
train_gen.class_indices
train_gen.image_shape
plt.figure(figsize = (15,6))
plt.subplot(1,3,1)
sns.countplot(train_df["labels"],color='lightblue')
plt.title("Train_df", size = 14)
plt.subplot(1,3,2)
sns.countplot(valid_df["labels"],color='lightblue')
plt.title("Validation_df", size = 14)
plt.subplot(1,3,3)
sns.countplot(test_df["labels"],color='lightblue')
plt.title("Test_df", size = 14)
plt.show()
A Convolutional Neural Network (ConvNet/CNN) is a Deep Learning algorithm which can take in an input image, assign importance (learnable weights and biases) to various aspects/objects in the image and be able to differentiate one from the other.
# CNN model with all sigmoid activation functions
CNN_model1 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='sigmoid', input_shape=(128,128,3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3), activation = 'sigmoid'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3), activation = 'sigmoid'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='sigmoid'),
tf.keras.layers.Dense(2, activation='sigmoid')
])
CNN_model1.summary()
CNN_model1.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')
history_CNN1 = CNN_model1.fit_generator(generator = train_gen,
validation_data = valid_gen,
epochs = 10,
steps_per_epoch = 10,
validation_steps = 10)
acc = history_CNN1.history['accuracy']
val_acc = history_CNN1.history['val_accuracy']
loss = history_CNN1.history['loss']
val_loss = history_CNN1.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='upper right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,2.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
import time
start = time.time()
prediction1 = CNN_model1.predict(test_gen)
end = time.time()
CNN_test_time1 = end - start
CNN_test_time1
CNN_test_loss1, CNN_test_acc1 = CNN_model1.evaluate_generator(test_gen)
CNN_test_loss1, CNN_test_acc1
CNN_model1.save('cnn1.h5')
# cnn model with all relu activation functions and last layer is sigmoid function
CNN_model2 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(128,128,3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3), activation = 'relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3), activation = 'relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(2, activation='sigmoid')
])
CNN_model2.summary()
CNN_model2.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')
history_CNN2 = CNN_model2.fit_generator(generator = train_gen,
validation_data = valid_gen,
epochs = 10,
steps_per_epoch = 10,
validation_steps = 10)
acc = history_CNN2.history['accuracy']
val_acc = history_CNN2.history['val_accuracy']
loss = history_CNN2.history['loss']
val_loss = history_CNN2.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
start = time.time()
prediction2 = CNN_model2.predict(test_gen)
end = time.time()
CNN_test_time2 = end - start
CNN_test_time2
CNN_test_loss2, CNN_test_acc2 = CNN_model2.evaluate_generator(test_gen)
CNN_test_loss2, CNN_test_acc2
CNN_model2.save('cnn2.h5')
# cnn model based on previous model with drooput layers to prevent overfitting and act as the regularization
CNN_model3 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu',input_shape=(128,128,3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(2, activation='sigmoid')
])
CNN_model3.summary()
CNN_model3.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')
history_CNN3 = CNN_model3.fit_generator(generator = train_gen,
validation_data = valid_gen,
epochs = 10,
steps_per_epoch = 10,
validation_steps = 10)
acc = history_CNN3.history['accuracy']
val_acc = history_CNN3.history['val_accuracy']
loss = history_CNN3.history['loss']
val_loss = history_CNN3.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
start = time.time()
prediction3 = CNN_model3.predict(test_gen)
end = time.time()
CNN_test_time3 = end - start
CNN_test_time3
CNN_test_loss3, CNN_test_acc3 = CNN_model3.evaluate_generator(test_gen)
CNN_test_loss3, CNN_test_acc3
CNN_model3.save('cnn3.h5')
# best cnn model so far is cnn3 which has dropout layers
CNN_test_accuracy = pd.DataFrame({'Model': ['CNN 1','CNN 2','CNN 3'], 'Accuracy': [CNN_test_acc1, CNN_test_acc2, CNN_test_acc3]})
CNN_test_accuracy
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications
MobileNet uses depthwise separable convolutions. It significantly reduces the number of parameters when compared to the network with regular convolutions with the same depth in the nets. This results in lightweight deep neural networks.
A depthwise separable convolution is made from two operations.
# mobilenet is streamlined architecture that constructs lightweight deep convolutional neural networks and provides an efficient model for mobile and embedded vision applications
mobilenet = MobileNet(include_top=False,input_shape=(128,128,3))
for layer in mobilenet.layers:
layer.trainable = False
mobilenet_model = tf.keras.models.Sequential()
mobilenet_model.add(mobilenet)
mobilenet_model.add(tf.keras.layers.Flatten())
mobilenet_model.add(tf.keras.layers.Dense(2,activation='sigmoid'))
mobilenet_model.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')
history_mobilenet = mobilenet_model.fit_generator(generator = train_gen,
validation_data = valid_gen,
epochs = 10,
steps_per_epoch = 10,
validation_steps = 10)
mobilenet_model.summary()
acc = history_mobilenet.history['accuracy']
val_acc = history_mobilenet.history['val_accuracy']
loss = history_mobilenet.history['loss']
val_loss = history_mobilenet.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
start = time.time()
prediction4 = mobilenet_model.predict(test_gen)
end = time.time()
mobilenet_test_time = end - start
mobilenet_test_time
mobilenet_test_loss, mobilenet_test_acc = mobilenet_model.evaluate_generator(test_gen)
mobilenet_test_loss, mobilenet_test_acc
#mobilenet_model.save('mobilenet.h5')
VGG16 is a convolutional neural network model proposed by K. Simonyan and A. Zisserman from the University of Oxford in the paper “Very Deep Convolutional Networks for Large-Scale Image Recognition”. The model achieves 92.7% top-5 test accuracy in ImageNet, which is a dataset of over 14 million images belonging to 1000 classes. 16 refers to the number of layers using same weight.
Layer.trainable: Layers & models also feature a boolean attribute trainable. Its value can be changed. Setting layer.trainable to False moves all the layer's weights from trainable to non-trainable. This is called "freezing" the layer: the state of a frozen layer won't be updated during training (either when training with fit() or when training with any custom loop that relies on trainable_weights to apply gradient updates).
# vgg16 base model
vgg = VGG16(weights='imagenet',include_top=False,input_shape=(128,128,3))
vgg.summary()
vgg16 = VGG16(weights='imagenet',include_top=False,input_shape=(128,128,3))
# Load weights pre-trained on ImageNet. # Do not include the ImageNet classifier at the top.
for layer in vgg16.layers:
layer.trainable = False
model_vgg16 = tf.keras.models.Sequential()
model_vgg16.add(vgg16)
model_vgg16.add(tf.keras.layers.Flatten())
model_vgg16.add(tf.keras.layers.Dense(2,activation='softmax'))
Early Stopping: it acts as regularization technique and prevents the overfitting on training data, through monitoring the validation loss for whether the performance is improved in epochs, which is decided by the corresponding patience parameter.¶
model_vgg16.compile(optimizer="adam",loss="binary_crossentropy",metrics ="accuracy")
early_stopping1 = keras.callbacks.EarlyStopping(patience=5, restore_best_weights=True)
history_vgg16 = model_vgg16.fit_generator(generator=train_gen,
validation_data=valid_gen,
epochs=10,
steps_per_epoch=10,
validation_steps=10,
callbacks=[early_stopping1])
model_vgg16.summary()
acc_vgg16 = history_vgg16.history['accuracy']
val_acc_vgg16 = history_vgg16.history['val_accuracy']
loss_vgg16 = history_vgg16.history['loss']
val_loss_vgg16 = history_vgg16.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc_vgg16, label='Training Accuracy')
plt.plot(val_acc_vgg16, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy - VGG16')
plt.subplot(2, 1, 2)
plt.plot(loss_vgg16, label='Training Loss')
plt.plot(val_loss_vgg16, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.5])
plt.title('Training and Validation Loss - VGG16')
plt.xlabel('epoch')
plt.show()
start = time.time()
prediction5 = model_vgg16.predict(test_gen)
end = time.time()
vgg16_test_time = end - start
vgg16_test_time
VGG16_test_loss, VGG16_test_acc = model_vgg16.evaluate_generator(test_gen)
VGG16_test_loss, VGG16_test_acc
#model_vgg16.save('vgg16.h5')
VGG19 is a convolutional neural network model that also trained on ImageNet, but it is 19 layers deep and is larger networks with more parameters. It increased depth using an architecture with very small (3x3) convolution filters.
Source: Combinido, J. S., John Robert Mendoza and Jeffrey A. Aborot. “A Convolutional Neural Network Approach for Estimating Tropical Cyclone Intensity Using Satellite-based Infrared Images.” 2018 24th International Conference on Pattern Recognition (ICPR) (2018): 1474-1480.
Source: Setiawan, Wahyudi & Damayanti, Fitri. (2020). Layers Modification of Convolutional Neural Network for Pneumonia Detection. Journal of Physics: Conference Series. 1477. 052055. 10.1088/1742-6596/1477/5/052055.
The most common incarnation of transfer learning in the context of deep learning is the following workflow:
# vgg 19 base model
vgg19 = VGG19(weights='imagenet',include_top=False,input_shape=(128,128,3))
vgg19.trainable = False
VGG19_model = tf.keras.models.Sequential()
VGG19_model.add(vgg19)
VGG19_model.add(tf.keras.layers.Flatten())
VGG19_model.add(tf.keras.layers.Dense(2,activation='softmax'))
VGG19_model.compile(optimizer='adam', loss='binary_crossentropy', metrics='binary_accuracy')
history_VGG19 = VGG19_model.fit_generator(generator=train_gen,
validation_data=valid_gen,
epochs=10,
steps_per_epoch=10,
validation_steps=10,
callbacks=[early_stopping1])
VGG19_model.summary()
acc = history_VGG19.history['binary_accuracy']
val_acc = history_VGG19.history['val_binary_accuracy']
loss = history_VGG19.history['loss']
val_loss = history_VGG19.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
start = time.time()
prediction6 = VGG19_model.predict(test_gen)
end = time.time()
vgg19_test_time = end - start
VGG19_test_loss, VGG19_test_acc = VGG19_model.evaluate_generator(test_gen)
VGG19_test_loss, VGG19_test_acc
#VGG19_model.save('vgg19.h5')
Fine-tuning consists of unfreezing the entire model you obtained above (or part of it), and re-training it on the new data with a very low learning rate. This can potentially achieve meaningful improvements, by incrementally adapting the pretrained features to the new data.
# vgg19 fine tuning model: continue training on the previous fitted vgg19 model
vgg19.trainable = True
VGG19_model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.00001),
loss='binary_crossentropy',
metrics='binary_accuracy')
# Using EarlyStopping, end training when val_accuracy is not improved for 4 consecutive times, seek max for val_accuracy
early_stopping = keras.callbacks.EarlyStopping(monitor='val_binary_accuracy',mode='max',patience=4,restore_best_weights=True)
# Using ReduceLROnPlateau, the learning rate is reduced by half when val_accuracy is not improved for 2 consecutive times
lr_scheduler = keras.callbacks.ReduceLROnPlateau(monitor='val_binary_accuracy',factor=0.5,patience=2,verbose=1)
# training
history_VGG19_T = VGG19_model.fit_generator(generator=train_gen,
validation_data=valid_gen,
epochs=10,
steps_per_epoch=10,
validation_steps=10,
callbacks=[early_stopping,lr_scheduler])
VGG19_model.summary()
acc = history_VGG19_T.history['binary_accuracy']
val_acc = history_VGG19_T.history['val_binary_accuracy']
loss = history_VGG19_T.history['loss']
val_loss = history_VGG19_T.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([0.9,1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
start = time.time()
prediction7 = VGG19_model.predict(test_gen)
end = time.time()
vgg19_T_test_time = end - start
VGG19_T_test_loss, VGG19_T_test_acc = VGG19_model.evaluate_generator(test_gen)
VGG19_T_test_loss, VGG19_T_test_acc
#VGG19_model.save('vgg19_T.h5')
test_results = pd.DataFrame({'Number': ['Model 1', 'Model 2','Model 3','Model 4','Model 5','Model 6','Model 7'],
'Model': ['CNN 1','CNN 2','CNN 3', 'MobileNet', 'VGG16', 'VGG19', 'VGG19_Tuning'],
'Accuracy': [CNN_test_acc1, CNN_test_acc2, CNN_test_acc3, mobilenet_test_acc, VGG16_test_acc, VGG19_test_acc, VGG19_T_test_acc],
'Processing Time': [CNN_test_time1, CNN_test_time2, CNN_test_time3, mobilenet_test_time, vgg16_test_time, vgg19_test_time, vgg19_T_test_time]})
test_results
# test result shows that both vgg16 and vgg19_tuning models have high accuracy scores, and compared with the corresponding processing time, vgg19 fine tuning outperforms vgg16
test_time= pd.DataFrame({'Number': ['Model 1', 'Model 2','Model 3','Model 4','Model 5','Model 6','Model 7'],
'Model': ['CNN 1','CNN 2','CNN 3', 'MobileNet', 'VGG16', 'VGG19', 'VGG19_Tuning'],
'Processing Time': [CNN_test_time1, CNN_test_time2, CNN_test_time3, mobilenet_test_time, vgg16_test_time, vgg19_test_time, vgg19_T_test_time]})
test_time
best_model = VGG19_model
train_gen.class_indices
#sample_mask_img = cv2.imread('/content/drive/MyDrive/Face Mask Dataset/Test/WithMask/231.png')
sample_mask_img = cv2.imread('/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Test/WithMask/231.png')
sample_mask_img = cv2.resize(sample_mask_img,(128,128))
plt.imshow(sample_mask_img)
sample_mask_img = np.reshape(sample_mask_img,[1,128,128,3])
sample_mask_img = sample_mask_img/255.0
best_model.predict(sample_mask_img)
pred = best_model.predict(sample_mask_img)
predicted_class_indices = np.argmax(pred,axis=1)
labels = (train_gen.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
predictions
#sample_mask_img = cv2.imread('/content/drive/MyDrive/Face Mask Dataset/Test/WithoutMask/379.png')
sample_mask_img = cv2.imread('/content/drive/MyDrive/Machine Learning/Face Mask Dataset/Test/WithoutMask/813.png')
sample_mask_img = cv2.resize(sample_mask_img,(128,128))
plt.imshow(sample_mask_img)
sample_mask_img = np.reshape(sample_mask_img,[1,128,128,3])
sample_mask_img = sample_mask_img/255.0
best_model.predict(sample_mask_img)
pred = best_model.predict(sample_mask_img)
predicted_class_indices=np.argmax(pred,axis=1)
labels = (train_gen.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
predictions
mask_label = {0:'MASK',1:'NO MASK'}
color_label = {0:(0,255,0),1:(255,0,0)}
if len(faces)>=2:
new_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) #colored output image
for i in range(len(faces)):
(x,y,w,h) = faces[i]
crop = new_img[y:y+h,x:x+w]
crop = cv2.resize(crop,(128,128))
crop = np.reshape(crop,[1,128,128,3])/255.0
mask_result = best_model.predict(crop)
cv2.putText(img = new_img, text = mask_label[mask_result.argmax()] + " " + str(round(mask_result.max()*100,2)) + "%",
org = (x, y-10), fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.4, color = color_label[mask_result.argmax()], thickness = 1)
cv2.rectangle(new_img, (x,y), (x+w,y+h), color_label[mask_result.argmax()], 1)
plt.figure(figsize=(10,10))
plt.imshow(new_img)
else:
print("No. of faces detected is less than 2")
img = cv2.imread('/content/drive/MyDrive/Machine Learning/Team Photo/1.png')
img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE)
faces = face_model.detectMultiScale(img,scaleFactor=1.2, minNeighbors=8) #returns a list of (x,y,w,h) tuples
out_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) #colored output image
for (x,y,w,h) in faces:
cv2.rectangle(out_img,(x,y),(x+w,y+h),(0,0,255),2)
plt.figure(figsize=(12,12))
plt.imshow(out_img)
if len(faces)>=2:
new_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for i in range(len(faces)):
(x,y,w,h) = faces[i]
crop = new_img[y:y+h,x:x+w]
crop = cv2.resize(crop,(128,128))
crop = np.reshape(crop,[1,128,128,3])/255.0
mask_result = best_model.predict(crop)
cv2.putText(img = new_img, text = mask_label[mask_result.argmax()] + " " + str(round(mask_result.max()*100,2)) + "%",
org = (x, y-10), fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.6, color = color_label[mask_result.argmax()], thickness = 2)
cv2.rectangle(new_img, (x,y), (x+w,y+h), color_label[mask_result.argmax()], 1)
plt.figure(figsize=(12,12))
plt.imshow(new_img)
else:
print("No. of faces detected is less than 2")
img = cv2.imread('/content/drive/MyDrive/Machine Learning/Team Photo/2.png')
img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE)
faces = face_model.detectMultiScale(img,scaleFactor=1.1, minNeighbors=4)
if len(faces)>=2:
new_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for i in range(len(faces)):
(x,y,w,h) = faces[i]
crop = new_img[y:y+h,x:x+w]
crop = cv2.resize(crop,(128,128))
crop = np.reshape(crop,[1,128,128,3])/255.0
mask_result = best_model.predict(crop)
cv2.putText(img = new_img, text = mask_label[mask_result.argmax()] + " " + str(round(mask_result.max()*100,2)) + "%",
org = (x, y-10), fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.6, color = color_label[mask_result.argmax()], thickness = 2)
cv2.rectangle(new_img, (x,y), (x+w,y+h), color_label[mask_result.argmax()], 1)
plt.figure(figsize=(12,12))
plt.imshow(new_img)
else:
print("No. of faces detected is less than 2")
img = cv2.imread('/content/drive/MyDrive/Machine Learning/Team Photo/3.png')
img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE)
faces = face_model.detectMultiScale(img,scaleFactor=1.1, minNeighbors=8)
if len(faces)>=2:
new_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for i in range(len(faces)):
(x,y,w,h) = faces[i]
crop = new_img[y:y+h,x:x+w]
crop = cv2.resize(crop,(128,128))
crop = np.reshape(crop,[1,128,128,3])/255.0
mask_result = best_model.predict(crop)
cv2.putText(img = new_img, text = mask_label[mask_result.argmax()] + " " + str(round(mask_result.max()*100,2)) + "%",
org = (x, y-10), fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.6, color = color_label[mask_result.argmax()], thickness = 2)
cv2.rectangle(new_img, (x,y), (x+w,y+h), color_label[mask_result.argmax()], 1)
plt.figure(figsize=(12,12))
plt.imshow(new_img)
else:
print("No. of faces detected is less than 2")